## Loading required package: knitr

## The basic files and libraries needed for most presentations
# creates the libraries and common-functions sections
read_chunk("../common/utility_functions.R")

require(ggplot2) #for plots
require(lattice) # nicer scatter plots
require(plyr) # for processing data.frames
require(grid) # contains the arrow function
require(biOps) # for basic image processing
require(doMC) # for parallel code
require(png) # for reading png images
require(gridExtra)
require(reshape2) # for the melt function
## To install EBImage
# source("http://bioconductor.org/biocLite.R")
# biocLite("EBImage")
require(EBImage) # for more image processing
used.libraries<-c("ggplot2","lattice","plyr","reshape2","grid","gridExtra","biOps","png","EBImage")

# start parallel environment
registerDoMC()
# functions for converting images back and forth
im.to.df<-function(in.img,out.col="val") {
  out.im<-expand.grid(x=1:nrow(in.img),y=1:ncol(in.img))
  out.im[,out.col]<-as.vector(in.img)
  out.im
}
df.to.im<-function(in.df,val.col="val",inv=F) {
  in.vals<-in.df[[val.col]]
  if(class(in.vals[1])=="logical") in.vals<-as.integer(in.vals*255)
  if(inv) in.vals<-255-in.vals
  out.mat<-matrix(in.vals,nrow=length(unique(in.df$x)),byrow=F)
  attr(out.mat,"type")<-"grey"
  out.mat
}
ddply.cutcols<-function(...,cols=1) {
  # run standard ddply command 
  cur.table<-ddply(...)
  cutlabel.fixer<-function(oVal) {
    sapply(oVal,function(x) {
      cnv<-as.character(x)
      mean(as.numeric(strsplit(substr(cnv,2,nchar(cnv)-1),",")[[1]]))
    })
  }
  cutname.fixer<-function(c.str) {
    s.str<-strsplit(c.str,"(",fixed=T)[[1]]
    t.str<-strsplit(paste(s.str[c(2:length(s.str))],collapse="("),",")[[1]]
    paste(t.str[c(1:length(t.str)-1)],collapse=",")
  }
  for(i in c(1:cols)) {
    cur.table[,i]<-cutlabel.fixer(cur.table[,i])
    names(cur.table)[i]<-cutname.fixer(names(cur.table)[i])
  }
  cur.table
}

show.pngs.as.grid<-function(file.list,title.fun,zoom=1) {
  preparePng<-function(x) rasterGrob(readPNG(x,native=T,info=T),width=unit(zoom,"npc"),interp=F)
  labelPng<-function(x,title="junk") (qplot(1:300, 1:300, geom="blank",xlab=NULL,ylab=NULL,asp=1)+
                                        annotation_custom(preparePng(x))+
                                        labs(title=title)+theme_bw(24)+
                                        theme(axis.text.x = element_blank(),
                                              axis.text.y = element_blank()))
  imgList<-llply(file.list,function(x) labelPng(x,title.fun(x)) )
  do.call(grid.arrange,imgList)
}
## Standard image processing tools which I use for visualizing the examples in the script
commean.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    weight.sum<-sum(c.cell$weight)
    data.frame(xv=mean(c.cell$x),
               yv=mean(c.cell$y),
               xm=with(c.cell,sum(x*weight)/weight.sum),
               ym=with(c.cell,sum(y*weight)/weight.sum)
    )
  })
}

colMeans.df<-function(x,...) as.data.frame(t(colMeans(x,...)))

pca.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    c.cell.cov<-cov(c.cell[,c("x","y")])
    c.cell.eigen<-eigen(c.cell.cov)
    
    c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
    out.df<-cbind(c.cell.mean,
                  data.frame(vx=c.cell.eigen$vectors[1,],
                             vy=c.cell.eigen$vectors[2,],
                             vw=sqrt(c.cell.eigen$values),
                             th.off=atan2(c.cell.eigen$vectors[2,],c.cell.eigen$vectors[1,]))
    )
  })
}
vec.to.ellipse<-function(pca.df) {
  ddply(pca.df,.(val),function(cur.pca) {
    # assume there are two vectors now
    create.ellipse.points(x.off=cur.pca[1,"x"],y.off=cur.pca[1,"y"],
                          b=sqrt(5)*cur.pca[1,"vw"],a=sqrt(5)*cur.pca[2,"vw"],
                          th.off=pi/2-atan2(cur.pca[1,"vy"],cur.pca[1,"vx"]),
                          x.cent=cur.pca[1,"x"],y.cent=cur.pca[1,"y"])
  })
}

# test function for ellipse generation
# ggplot(ldply(seq(-pi,pi,length.out=100),function(th) create.ellipse.points(a=1,b=2,th.off=th,th.val=th)),aes(x=x,y=y))+geom_path()+facet_wrap(~th.val)+coord_equal()
create.ellipse.points<-function(x.off=0,y.off=0,a=1,b=NULL,th.off=0,th.max=2*pi,pts=36,...) {
  if (is.null(b)) b<-a
  th<-seq(0,th.max,length.out=pts)
  data.frame(x=a*cos(th.off)*cos(th)+b*sin(th.off)*sin(th)+x.off,
             y=-1*a*sin(th.off)*cos(th)+b*cos(th.off)*sin(th)+y.off,
             id=as.factor(paste(x.off,y.off,a,b,th.off,pts,sep=":")),...)
}
deform.ellipse.draw<-function(c.box) {
  create.ellipse.points(x.off=c.box$x[1],
                        y.off=c.box$y[1],
                        a=c.box$a[1],
                        b=c.box$b[1],
                        th.off=c.box$th[1],
                        col=c.box$col[1])                    
}
bbox.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
    xmn<-emin(c.cell$x)
    xmx<-emax(c.cell$x)
    ymn<-emin(c.cell$y)
    ymx<-emax(c.cell$y)
    out.df<-cbind(c.cell.mean,
                  data.frame(xi=c(xmn,xmn,xmx,xmx,xmn),
                             yi=c(ymn,ymx,ymx,ymn,ymn),
                             xw=xmx-xmn,
                             yw=ymx-ymn
                  ))
  })
}

# since the edge of the pixel is 0.5 away from the middle of the pixel
emin<-function(...) min(...)-0.5
emax<-function(...) max(...)+0.5
extents.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
    out.df<-cbind(c.cell.mean,data.frame(xmin=c(c.cell.mean$x,emin(c.cell$x)),
                                         xmax=c(c.cell.mean$x,emax(c.cell$x)),
                                         ymin=c(emin(c.cell$y),c.cell.mean$y),
                                         ymax=c(emax(c.cell$y),c.cell.mean$y)))
  })
}

common.image.path<-"../common"
qbi.file<-function(file.name) file.path(common.image.path,"figures",file.name)
qbi.data<-function(file.name) file.path(common.image.path,"data",file.name)

th_fillmap.fn<-function(max.val) scale_fill_gradientn(colours=rainbow(10),limits=c(0,max.val))

Quantitative Big Imaging

author: Kevin Mader date: 12 March 2015 width: 1440 height: 900 css: ../common/template.css transition: rotate

ETHZ: 227-0966-00L

Advanced Segmentation and Labeling

Course Outline

source('../common/schedule.R')

Literature / Useful References

Advanced Segmentation


Contouring

Lesson Outline

What we covered last time

Where segmentation fails: Inconsistent Illumination

With inconsistent or every changing illumination it may not be possible to apply the same threshold to every image.

cellImage<-im.to.df(jpeg::readJPEG(qbi.file("Cell_Colony.jpg")))
max.il<-2.5
il.vals<-runif(9,min=1/max.il,max=max.il)
im.vals<-ldply(1:length(il.vals),function(il.idx,th.val=0.75)
  cbind(cellImage[,c("x","y")],
        val=cellImage$val*il.vals[il.idx],
        in.thresh=ifelse(cellImage$val*il.vals[il.idx]
Cell Colony with Different Illuminations


ggplot(subset(im.vals,in.thresh=="Cells"),aes(x=x,y=y))+
  geom_raster(fill="red")+facet_wrap(~il.idx)+
  labs(fill="Phase",title="Cell Phase")+
  theme_bw(20)+coord_equal()
Different Illuminations with Constant Threshold

Where segmentation fails: Canaliculi

Bone Slice

Here is a bone slice

  1. Find the larger cellular structures (osteocyte lacunae)
  2. Find the small channels which connect them together

The first task

is easy using a threshold and size criteria (we know how big the cells should be)

The second

is much more difficult because the small channels having radii on the same order of the pixel size are obscured by partial volume effects and noise.

Where segmentation fails: Brain Cortex

alz.df<-im.to.df(t(png::readPNG("ext-figures/cortex.png")))
ggplot(alz.df,aes(x=x,y=518-y))+
  geom_raster(aes(fill=val))+
  labs(fill="Electron Density",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
Brain with Threshold


Automated Threshold Selection

Many possible automated techniques


Given that applying a threshold is such a common and signficant step, there have been many tools developed to automatically (unsupervised) perform it. A particularly important step in setups where images are rarely consistent such as outdoor imaging which has varying lighting (sun, clouds). The methods are based on several basic principles.

Automated Methods

Histogram-based methods

Just like we visually inspect a histogram an algorithm can examine the histogram and find local minimums between two peaks, maximum / minimum entropy and other factors

Image based Methods

These look at the statistics of the thresheld image themselves (like entropy) to estimate the threshold

Results-based Methods

These search for a threshold which delivers the desired results in the final objects. For example if you know you have an image of cells and each cell is between 200-10000 pixels the algorithm runs thresholds until the objects are of the desired size

Fiji -> Adjust -> Auto Threshold

There are many methods and they can be complicated to implement yourself. FIJI offers many of them as built in functions so you can automatically try all of them on your image Many possible automated techniques

Pitfalls

While an incredibly useful tool, there are many potential pitfalls to these automated techniques.

Histogram-based

These methods are very sensitive to the distribution of pixels in your image and may work really well on images with equal amounts of each phase but work horribly on images which have very high amounts of one phase compared to the others

Image-based

These methods are sensitive to noise and a large noise content in the image can change statistics like entropy significantly.

Results-based

These methods are inherently biased by the expectations you have. If you want to find objects between 200 and 1000 pixels you will, they just might not be anything meaningful.

Realistic Approaches for Dealing with these Shortcomings

Imaging science rarely represents the ideal world and will never be 100% perfect. At some point we need to write our master's thesis, defend, or publish a paper. These are approaches for more qualitative assessment we will later cover how to do this a bit more robustly with quantitative approaches

Model-based

One approach is to try and simulate everything (including noise) as well as possible and to apply these techniques to many realizations of the same image and qualitatively keep track of how many of the results accurately identify your phase or not. Hint: >95% seems to convince most biologists

Sample-based

Apply the methods to each sample and keep track of which threshold was used for each one. Go back and apply each threshold to each sample in the image and keep track of how many of them are correct enough to be used for further study.

Worst-case Scenario

Come up with the worst-case scenario (noise, misalignment, etc) and assess how inacceptable the results are. Then try to estimate the quartiles range (75% - 25% of images).

Hysteresis Thresholding

For some images a single threshold does not work

ImageJ Source

bone.df<-im.to.df(png::readPNG("ext-figures/bonegfiltslice.png"))
ggplot(bone.df,aes(x=x,y=y))+
  geom_raster(aes(fill=val))+
  labs(fill="Calcification Dens",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
Bone Slice


ggplot(bone.df,aes(x=val))+
  geom_histogram(aes(y=..density..),alpha=0.5)+
  geom_density()+scale_y_sqrt()+
  labs(x="Calcification Dens")+
  theme_bw(20)
Bone Slice Histogram

thresh.fun<-function(x) {ifelse(x<0.01,"Air",ifelse(x<0.30,"Between","Bone"))}
bone.df$phase<-thresh.fun(bone.df$val)
ggplot(bone.df,aes(x=val))+
  geom_histogram(aes(fill=phase),binwidth=0.02,alpha=0.5)+
  geom_density(aes(y=15000/1.5*..scaled..))+
  labs(x="Calcification Density (au)")+
  scale_y_sqrt()+#(c(0,20))+
  theme_bw(20)
Bone Labeled Histogram

Hysteresis Thresholding

Comparing the original image with the three phases

ggplot(bone.df,aes(x=x,y=y))+
  geom_raster(aes(fill=val))+
  labs(fill="Calcification Dens",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
Bone Slice


ggplot(bone.df,aes(x=x,y=y))+
  geom_raster(aes(fill=phase))+
  labs(fill="Phase",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
Bone Slice

Hysteresis Thresholding: Reducing Pixels

Now we apply two important steps. The first is to remove the objects which are not cells (too small) using an opening operation.

air.im<-df.to.im(cbind(bone.df,isair=bone.df$phase=="Air"),"isair")
air.df<-im.to.df(opening(air.im,makeBrush(5,"disc")))
names(air.df)[3]<-"stillair"
nbone.df<-merge(air.df,bone.df)
ggplot(nbone.df,aes(x=x,y=y))+
  geom_raster(aes(fill=phase,alpha=stillair))+
  labs(fill="Phase",y="y",x="x",title="After Opening")+
  coord_equal()+guides(alpha=F)+
  theme_bw(20)
Bone Slice
# if its air make sure its still air otherwise demote it to between, 
# if its not air leave it alone
nbone.df$phase<-ifelse(nbone.df$phase=="Air",
       ifelse(nbone.df$stillair>0,"Air","Between"),
       nbone.df$phase)


The second step to keep the between pixels which are connected (by looking again at a neighborhood \(\mathcal{N}\)) to the air voxels and ignore the other ones. This goes back to our original supposition that the smaller structures are connected to the larger structures

# incredibly low performance implementation (please do not copy)
bone.idf<-nbone.df
bet.pts<-nbone.df
# run while there is still new air being created
while(nrow(subset(bet.pts,phase=="Air"))>0) {
  air.pts<-subset(bone.idf,phase=="Air")[,c("x","y")]
  bone.pts<-subset(bone.idf,phase=="Bone")[,c("x","y")]
  bet.pts<-ddply(subset(bone.idf,phase=="Between"),.(x,y),function(in.pixel.lst) {
    in.pixel<-in.pixel.lst[1,]
    data.frame(phase=ifelse(min(with(air.pts,(in.pixel$x-x)^2+(in.pixel$y-y)^2))<=1,
         "Air",
         "Between"))
  })
  bone.idf<-rbind(bet.pts,
                  cbind(air.pts,phase="Air"),
                  cbind(bone.pts,phase="Bone"))
  print(nrow(subset(bet.pts,phase=="Air")))
}
ggplot(bone.idf,aes(x=x,y=y))+
  geom_raster(aes(fill=phase))+
  labs(fill="Phase",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
Bone Slice

More Complicated Images

As we briefly covered last time, many measurement techniques produce quite rich data.


nx<-4
ny<-4
n.pi<-4
grad.im<-expand.grid(x=c(-nx:nx)/nx*n.pi*pi,
                     y=c(-ny:ny)/ny*n.pi*pi)

grad.im<-cbind(grad.im,
               col=1.5*with(grad.im,abs(cos(x*y))/(abs(x*y)+(3*pi/nx)))+
                 0.5*runif(nrow(grad.im)),
              a=with(grad.im,sqrt(2/(abs(x)+0.5))),
               b=with(grad.im,0.5*sqrt(abs(x)+1)),
              th=0.5*runif(nrow(grad.im)),
              aiso=1,count=1)

create.ellipse.points<-function(x.off=0,y.off=0,a=1,b=NULL,th.off=0,th.max=2*pi,pts=36,...) {
  if (is.null(b)) b<-a
  th<-seq(0,th.max,length.out=pts)
  data.frame(x=a*cos(th.off)*cos(th)+b*sin(th.off)*sin(th)+x.off,
             y=-1*a*sin(th.off)*cos(th)+b*cos(th.off)*sin(th)+y.off,
             id=as.factor(paste(x.off,y.off,a,b,th.off,pts,sep=":")),...)
}
deform.ellipse.draw<-function(c.box) {
  create.ellipse.points(x.off=c.box$x[1],
                        y.off=c.box$y[1],
                        a=c.box$a[1],
                        b=c.box$b[1],
                        th.off=c.box$th[1],
                        col=c.box$col[1])                    
}

# normalize vector
tens.im<-ddply(grad.im,.(x,y),deform.ellipse.draw)

ggplot(tens.im,aes(x=x,y=y,group=as.factor(id),fill=col))+
  geom_polygon(color="black")+coord_fixed(ratio=1)+scale_fill_gradient(low="black",high="white")+guides(fill=F)+
  theme_bw(20)

Feature Vectors

A pairing between spatial information (position) and some other kind of information (value). \[ \vec{x} \rightarrow \vec{f} \]

We are used to seeing images in a grid format where the position indicates the row and column in the grid and the intensity (absorption, reflection, tip deflection, etc) is shown as a different color

basic.image<-im.to.df(matrix(round(runif(5*5,0,100)),nrow=5))
names(basic.image)[3]<-"Intensity"

simple.image<-ggplot(basic.image,aes(x,y,fill=Intensity))+
  geom_tile(color="black")+
  geom_text(aes(label=paste("(",x,",",y,")\n",round(Intensity))))+
  coord_equal()+
  theme_bw(15)

simple.image
plot of chunk unnamed-chunk-14


The alternative form for this image is as a list of positions and a corresponding value

\[ \hat{I} = (\vec{x},\vec{f}) \]

kable(head(basic.image))
x y Intensity
1 1 70
2 1 88
3 1 23
4 1 37
5 1 20
1 2 72

This representation can be called the feature vector and in this case it only has Intensity

Why Feature Vectors

If we use feature vectors to describe our image, we are no longer to worrying about how the images will be displayed, and can focus on the segmentation/thresholding problem from a classification rather than a image-processing stand point.

Example

So we have an image of a cell and we want to identify the membrane (the ring) from the nucleus (the point in the middle).

ring.image<-mutate(
  expand.grid(x=c(-10:10),y=c(-10:10)),
  Intensity = abs(cos(sqrt(x^2+y^2)/15*6.28))
  )

simple.image<-ggplot(ring.image,aes(x,y,fill=Intensity))+
  geom_tile(color="black")+
  coord_equal()+
  theme_bw(15)

simple.image
plot of chunk unnamed-chunk-16


A simple threshold doesn't work because we identify the point in the middle as well. We could try to use morphological tricks to get rid of the point in the middle, or we could better tune our segmentation to the ring structure.

ggplot(ring.image,aes(x,y,fill=Intensity))+
  geom_tile(color="black")+
  geom_tile(data=subset(ring.image,Intensity>0.75),aes(color="Threshold"),fill="red",alpha=0.5)+
  coord_equal()+
  labs(color="")+
  theme_bw(15)
plot of chunk unnamed-chunk-17

Adding a new feature

In this case we add a very simple feature to the image, the distance from the center of the image (distance).

ring.image.dist<-ddply(ring.image,.(x,y),function(in.pts) {
  in.pts$Distance<-with(in.pts,sqrt(
      min(
        x^2+y^2
      )
    ))
  in.pts
})

ggplot(ring.image.dist,aes(x,y,fill=Distance))+
  geom_tile(color="black")+
  coord_equal()+
  scale_fill_gradientn(colours=rainbow(5))+
  theme_bw(15)
plot of chunk unnamed-chunk-18

kable(head(ring.image.dist))
x y Intensity Distance
-10 -10 0.9350683 14.14214
-10 -9 0.7957197 13.45362
-10 -8 0.6045178 12.80625
-10 -7 0.3876575 12.20656
-10 -6 0.1692429 11.66190
-10 -5 0.0315481 11.18034

We now have a more complicated image, which we can't as easily visualize, but we can incorporate these two pieces of information together.

ggplot(ring.image.dist,aes(y=Distance,x=Intensity))+
  geom_density2d()+
  geom_point(aes(color=Distance,size=Intensity))+
  scale_color_gradientn(colours=rainbow(5))+
  labs(color="Distance",size="Intensity")+
  theme_bw(20)
plot of chunk unnamed-chunk-20

Applying two criteria

Now instead of trying to find the intensity for the ring, we can combine density and distance to identify it

\[ iff (5<\textrm{Distance}<10 \\ \& 0.5<\textrm{Intensity}>1.0) \]

ggplot(ring.image.dist,aes(y=Distance,x=Intensity))+
  geom_density2d(aes(color="Density"))+
  geom_point()+
  geom_rect(aes(color="Threshold"),xmin=0.5,xmax=1,ymin=5,ymax=10,fill=NA)+
  labs(color="")+
  theme_bw(20)
plot of chunk unnamed-chunk-21


ring.image.seg<-mutate(ring.image.dist,
                       Segmented = Intensity>0.5 & Distance>5 & Distance<10)
ggplot(ring.image,aes(x,y,fill=Intensity))+
  geom_tile(color="black")+
  geom_tile(data=subset(ring.image.seg,Segmented),
            aes(color="Threshold"),fill="red",alpha=0.5)+
  coord_equal()+
  labs(color="")+
  theme_bw(15)
plot of chunk unnamed-chunk-22

Common Features

The distance while illustrative is not a commonly used features, more common various filters applied to the image

kable(head(merged.ring.image[,c("x","y","Intensity","Sobel","Gaussian")]),digits=2)
x y Intensity Sobel Gaussian
1 1 0.94 0.32 0.53
1 10 0.48 0.50 0.45
1 11 0.50 0.50 0.46
1 12 0.48 0.64 0.46
1 13 0.43 0.78 0.45
1 14 0.33 0.94 0.42

mfeat.vec<-melt(merged.ring.image,id.vars=c("x","y","Segmented","Distance"))
ggplot(mfeat.vec,
       aes(x=x,y=y,fill=value))+
  geom_tile(size=1,alpha=0.75)+
  scale_fill_gradientn(colours=rainbow(5))+
  coord_equal()+
  labs(fill="")+
  facet_grid(variable~.)+
  theme_bw(20)
plot of chunk unnamed-chunk-24

Analyzing the feature vector

The distributions of the features appear very different and can thus likely be used for identifying different parts of the images.

ggplot(mfeat.vec,aes(x=value,color=variable))+
  geom_density()+
  scale_y_sqrt()+
  labs(x="Feature Value",color="Feature")+
  theme_bw(20)
plot of chunk unnamed-chunk-25


Combine this with our a priori information (called supervised analysis)

ggplot(mutate(mfeat.vec,Structure=ifelse(Segmented,"Ring","Background")),
       aes(x=value,color=Structure))+
  geom_density()+
  labs(x="Feature Value",color="Structure")+
  facet_grid(variable~.,scales="free")+
  theme_bw(12)
plot of chunk unnamed-chunk-26

splom(merged.ring.image[,c("Intensity","Sobel","Gaussian")],groups=merged.ring.image$Segmented,pch=16)
plot of chunk unnamed-chunk-27

K-Means Clustering / Classification (Unsupervised)

K-Means Example

Input

x y Intensity Sobel Gaussian
1 1 1 0.94 0.32 0.53
2 1 10 0.48 0.50 0.45
3 1 11 0.50 0.50 0.46
4 1 12 0.48 0.64 0.46
5 1 13 0.43 0.78 0.45
6 1 14 0.33 0.94 0.42

\[\downarrow\]

Output

x y Intensity Sobel Gaussian
20 1 8 0.33 0.50 0.40
21 1 9 0.43 0.50 0.42
22 10 1 0.48 0.14 0.45
23 10 10 0.83 0.50 0.42
24 10 11 0.91 0.50 0.36
x y Intensity Sobel Gaussian
100 13 4 1.00 0.16 0.49
101 13 5 0.88 0.74 0.49
102 13 6 0.63 0.96 0.52
103 13 7 0.30 0.94 0.55
104 13 8 0.06 0.00 0.55

K-Means Algorithm

We give as an initial parameter the number of groups we want to find and possible a criteria for removing groups that are too similar

  1. Randomly create center points (groups) in vector space
  2. Assigns group to data point by the “closest” center
  3. Recalculate centers from mean point in each group
  4. Go back to step 2 until the groups stop changing

What vector space to we have?

Note: If you look for N groups you will almost always find N groups with K-Means, whether or not they make any sense

K-Means Example

Continuing with our previous image and applying K-means to the Intensity, Sobel and Gaussian channels looking for 2 groups we find

sring.image<-merged.ring.image[,c("x","y","Intensity","Sobel","Gaussian")]
sring.image$km.cluster<-kmeans(sring.image[,c("Intensity","Sobel","Gaussian")],2)$cluster
sring.vec<-melt(sring.image,id.vars=c("x","y","km.cluster"))
ggplot(sring.vec,
       aes(x=x,y=y,fill=value,color=as.factor(km.cluster)))+
  geom_tile(size=0.75,alpha=0.75)+
  #scale_fill_gradientn(colours=rainbow(5))+
  coord_equal()+
  labs(fill="",color="Cluster")+
  facet_grid(~variable)+
  theme_bw(10)
KMeans

splom(sring.image[,c("Intensity","Sobel","Gaussian")],groups=sring.image$km.cluster,pch=16)
Variable distributions


Looking for 5 groups

sring.image$km.cluster<-kmeans(sring.image[,c("Intensity","Sobel","Gaussian")],5)$cluster
sring.vec<-melt(sring.image,id.vars=c("x","y","km.cluster"))
ggplot(sring.vec,
       aes(x=x,y=y,fill=value,color=as.factor(km.cluster)))+
  geom_tile(size=0.75,alpha=0.75)+
  #scale_fill_gradientn(colours=rainbow(5))+
  coord_equal()+
  labs(fill="",color="Cluster")+
  facet_grid(~variable)+
  theme_bw(10)
KMeans

splom(sring.image[,c("Intensity","Sobel","Gaussian")],groups=sring.image$km.cluster,pch=16)
Variable distributions

Changing the feature vector

Including the position in the features as well

sring.image$km.cluster<-kmeans(sring.image[,c("x","y","Intensity","Sobel","Gaussian")],5)$cluster
sring.vec<-melt(sring.image,id.vars=c("x","y","km.cluster"))
ggplot(sring.vec,
       aes(x=x,y=y,fill=value,color=as.factor(km.cluster)))+
  geom_tile(size=0.75,alpha=0.75)+
  #scale_fill_gradientn(colours=rainbow(5))+
  coord_equal()+
  labs(fill="",color="Cluster")+
  facet_grid(~variable)+
  theme_bw(10)
KMeans

splom(sring.image[,c("x","y","Intensity","Sobel","Gaussian")],groups=sring.image$km.cluster,pch=16)
Variable distributions

Rescaling components

Since the distance is currently calculated by \(||\vec{v}_i-\vec{v}_j||\) and the values for the position is much larger than the values for the Intensity, Sobel or Gaussian they need to be rescaled so they all fit on the same axis \[\vec{v} = \left\{\frac{x}{10}, \frac{y}{10}, \textrm{Intensity},\textrm{Sobel},\textrm{Gaussian}\right\}\]

sring.image$km.cluster<-kmeans(mutate(
  sring.image[,c("x","y","Intensity","Sobel","Gaussian")],
  x=x/20,y=y/20)
                               ,5)$cluster

sring.vec<-melt(sring.image,id.vars=c("x","y","km.cluster"))
ggplot(sring.vec,
       aes(x=x,y=y,fill=value,color=as.factor(km.cluster)))+
  geom_tile(size=0.75,alpha=0.75)+
  #scale_fill_gradientn(colours=rainbow(5))+
  coord_equal()+
  labs(fill="",color="Cluster")+
  facet_grid(~variable)+
  theme_bw(10)
KMeans


splom(sring.image[,c("x","y","Intensity","Sobel","Gaussian")],groups=sring.image$km.cluster,pch=16)
Variable distributions

Super-pixels

An approach for simplifying images by performing a clustering and forming super-pixels from groups of similar pixels.

shale.array<-readTiff(qbi.file("shale-slice.tiff"))
shale.image<-im.to.df(shale.array[,,1],out.col="Intensity")
ggplot(shale.image,aes(x,y,fill=Intensity))+
  geom_tile()+
  coord_equal()+
  labs(fill="")+
  theme_bw(10)
plot of chunk unnamed-chunk-39


pos.scale<-5
shale.kmeans.data<-mutate(
  shale.image[,c("x","y","Intensity")],
  x=x/pos.scale,y=y/pos.scale)
# evenly spaced grid
grid.size<-40
grid.center<-round(grid.size/2)
shale.superpixels.centers<-subset(shale.kmeans.data, 
                                  x%%grid.size==grid.center & x%%grid.size==grid.center)
# not the real super pixels algorithm but close enough, see paper and websites
# http://www.kev-smith.com/papers/SLIC_Superpixels.pdf
# http://ivrg.epfl.ch/research/superpixels
shale.kmeans<-kmeans(shale.kmeans.data,
                               centers=shale.superpixels.centers,
                               iter.max=50
  )
shale.image$km.cluster<-shale.kmeans$cluster

ggplot(shale.image,
       aes(x=x,y=y,alpha=Intensity))+
  geom_tile(aes(fill=as.factor(km.cluster)),size=0)+#,alpha=0.75)+
  scale_alpha_continuous(range=c(0.5,1))+
  guides(fill=F)+
  coord_equal()+
  labs(fill="")+
  theme_bw(10)
KMeans

Why use superpixels

Drastically reduced data size, serves as an initial segmentation showing spatially meaningful groups

shale.centers<-mutate(as.data.frame(shale.kmeans$centers),x=pos.scale*x,y=pos.scale*y)
ggplot(shale.centers,
       aes(x,y,color=Intensity))+
  geom_point(size=3)+
  scale_color_gradientn(colours=rainbow(5))+
  labs(x="X",y="Y")+
  coord_equal()+
  theme_bw(20)
plot of chunk unnamed-chunk-42


ggplot(shale.centers,
       aes(x=Intensity))+
  geom_vline(aes(xintercept=Intensity),alpha=0.02)+
  geom_density(aes(y=..count..))+
  geom_vline(aes(color="Threshold"),color="red",xintercept=c(130,180))+
  labs(y="Number of Superpixels")+
  theme_bw(20)
plot of chunk unnamed-chunk-43

Segment the superpixels and apply them to the whole image (only a fraction of the data and much smaller datasets)

shale.centers<-mutate(shale.centers,
                      cluster=1:nrow(shale.centers),
                      group=as.factor(ifelse(Intensity<130,"Dark",
                                   ifelse(Intensity<180,"Medium","Light")))
                      )
grp.image<-merge(shale.image,shale.centers[,c("cluster","group")],by.x="km.cluster",by.y="cluster")

ggplot(grp.image,
       aes(x=x,y=y,alpha=Intensity))+
  geom_tile(aes(fill=as.factor(group)),size=0)+#,alpha=0.75)+
  scale_alpha_continuous(range=c(0.5,1))+
  coord_equal()+
  labs(fill="")+
  theme_bw(10)
plot of chunk unnamed-chunk-45

Superpixels vs Standard Segmentation

Superpixels (0.06% of the original)

ggplot(grp.image,
       aes(x=x,y=y,alpha=Intensity))+
  geom_tile(aes(fill=as.factor(group)),size=0)+#,alpha=0.75)+
  scale_alpha_continuous(range=c(0.5,1))+
  coord_equal()+
  labs(fill="")+
  theme_bw(10)
plot of chunk unnamed-chunk-46


Original

ggplot(
  mutate(shale.image,group=as.factor(ifelse(Intensity<130,"Dark",
                                   ifelse(Intensity<180,"Medium","Light")))),
       aes(x=x,y=y,alpha=Intensity))+
  geom_tile(aes(fill=as.factor(group)),size=0)+#,alpha=0.75)+
  scale_alpha_continuous(range=c(0.5,1))+
  coord_equal()+
  labs(fill="")+
  theme_bw(10)
plot of chunk unnamed-chunk-47

Probabilistic Models of Segmentation

A more general approach is to use a probabilistic model to segmentation. We start with our image \(I(\vec{x}) \forall \vec{x}\in \mathbb{R}^N\) and we classify it into two phases \(\alpha\) and \(\beta\)

\[P(\{\vec{x} , I(\vec{x})\} | \alpha) \propto P(\alpha) + P(I(\vec{x}) | \alpha)+ P(\sum_{x^{\prime} \in \mathcal{N}} I(\vec{x^{\prime}}) | \alpha)\]

Contouring

Expanding on the hole filling issues examined before, a general problem in imaging is identifying regions of interest with in an image.

Convex Hull Approach

takes all of the points in a given slice or volume and finds the smallest convex 2D area or 3D volume (respectively) which encloses all of those points.

test.im<-function(n) mutate(
  data.frame(th=c(0:200)/200.0*2*pi),
  r=cos(th*n),
  x=r*cos(th),
  y=r*sin(th)
  )
pthull<-function(pts,x.col="x",y.col="y") {
  pts.hull<-chull(pts[,c(x.col,y.col)])
  pts[c(pts.hull,pts.hull[1]),c(x.col,y.col)]
}
test.imgs<-ldply(c(1:6),function(n) {
  tdata<-test.im(n)[,c("x","y")]
  cbind(rbind(cbind(tdata,src="pts"),
        cbind(pthull(tdata),src="hull")),n=n)
})

ggplot(subset(test.imgs,src=="pts"),aes(x,y))+
  geom_polygon(data=subset(test.imgs,src=="hull"),aes(fill="convex\nhull"),alpha=0.5)+
  geom_point()+
  labs(fill="")+
  facet_wrap(~n)+
  theme_bw(20)
plot of chunk unnamed-chunk-48


Depending on the type of sample the convex hull can make sense for filling in the gaps and defining the boundaries for a sample.

test.img<-subset(test.imgs,n==2 & src=="pts")[,c("x","y")]
ggplot(test.img,aes(x,y))+
  geom_polygon(data=pthull(test.img),aes(fill="convex\nhull"),alpha=0.5)+
  geom_point()+
  labs(fill="")+
  theme_bw(20)
plot of chunk unnamed-chunk-49

The critical short coming is it is very sensitive to single outlier points.

test.img<-rbind(subset(test.imgs,n==2 & src=="pts")[,c("x","y")],data.frame(x=-1,y=-1))
ggplot(test.img,aes(x,y))+
  geom_polygon(data=pthull(test.img),aes(fill="convex\nhull"),alpha=0.5)+
  geom_point()+
  labs(fill="")+
  theme_bw(20)
plot of chunk unnamed-chunk-50

Convex Hull Example

The convex hull very closely matches the area we would define as 'bone' without requiring any parameter adjustment, resolution specific adjustments, or extensive image-processing, for such a sample a convex hull is usually sufficient.

cortbone.im<-imagedata(t(png::readPNG(qbi.file("bone-section.png"))[,,1]),"grey")
cortbone.df<-im.to.df(cortbone.im)
# calculate convex hull
cortbone.chull<-chull(subset(cortbone.df,val>0.5)[,c("x","y")])
cortbone.chull<-c(cortbone.chull,cortbone.chull[1])
cortbone.chull<-subset(cortbone.df,val>0.5)[cortbone.chull,c("x","y")]
ggplot(subset(cortbone.df,val>0.5),aes(x=x,y=518-y))+
  geom_polygon(data=cortbone.chull,aes(fill="convex hull"),alpha=0.5)+
  geom_raster(aes(fill="original"),alpha=0.8)+
  labs(fill="Image",y="y",x="x",title="Convex Hull Creation")+
  coord_equal()+
  theme_bw(20)
Cortical Segment Convex Hull


Here is an example of the convex hull applied to a region of a cortical bone sample. The green shows the bone and the red shows the convex hull. Compared to a visual inspection, the convex hull overestimates the bone area as we probably would not associate the region where the bone curves to the right with 'bone area'

cortbone.im<-imagedata(t(png::readPNG(qbi.file("bone.png"))),"grey")
cortbone.df<-im.to.df(cortbone.im)
# calculate convex hull
cortbone.chull<-chull(subset(cortbone.df,val<1)[,c("x","y")])
cortbone.chull<-c(cortbone.chull,cortbone.chull[1])
cortbone.chull<-subset(cortbone.df,val<1)[cortbone.chull,c("x","y")]
ggplot(subset(cortbone.df,val<1),aes(x=x,y=518-y))+
  geom_polygon(data=cortbone.chull,aes(fill="convex hull"),alpha=0.5)+
  geom_raster(aes(fill="original"),alpha=0.8)+
  labs(fill="Image",y="y",x="x",title="Convex Hull Creation")+
  coord_equal()+
  theme_bw(20)
Cortical Segment Convex Hull


Rubber Band

## the rubber band function to fit a boundary around the curve
rubber.band.data<-function(raw.df,binning.pts=10,eval.fun=max) {
  in.df<-raw.df
  # calculate center of mass
  com.val<-colMeans(in.df)
  # add polar coordinates
  in.df$r<-with(in.df,sqrt((x-com.val["x"])^2+(y-com.val["y"])^2))
  in.df$theta<-with(in.df,atan2(y-com.val["y"],x-com.val["x"]))
  # create a maximum path
  outer.path<-ddply.cutcols(in.df,.(cut_interval(theta,binning.pts)),function(c.section) data.frame(r=eval.fun(c.section$r)))
  outer.path$x<-with(outer.path,r*cos(theta)+com.val["x"])
  outer.path$y<-with(outer.path,r*sin(theta)+com.val["y"])
  outer.path
}

Useful for a variety of samples (needn't be radially symmetric) and offers more flexibility in step size, smoothing function etc than convex hull.

  1. Calculates the center of mass.
  2. Transforms sample into Polar Coordinates
  3. Calculates a piecewise linear fit \(r=f(\theta)\)
ggplot(subset(cortbone.df,val<1),aes(x=x,y=518-y))+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),9),aes(fill="rubber band   9pts"),alpha=0.5)+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),36),aes(fill="rubber band  36pts"),alpha=0.5)+
  geom_raster(aes(fill="original"),alpha=0.8)+
  labs(fill="Image",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
First Rubber Bands


ggplot(subset(cortbone.df,val<1),aes(x=x,y=518-y))+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),90),aes(fill="rubber band  90pts"),alpha=0.5)+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),180),aes(fill="rubber band 180pts"),alpha=0.5)+
  geom_raster(aes(fill="original"),alpha=0.8)+
  labs(fill="Image",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
Better Rubber Bands

ggplot(subset(cortbone.df,val<1),aes(x=x,y=518-y))+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),180),aes(fill="rubber band 180pts"),alpha=0.5)+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),720),aes(fill="rubber band 720pts"),alpha=0.5)+
  geom_raster(aes(fill="original"),alpha=0.8)+
  labs(fill="Image",y="y",x="x")+
  coord_equal()+xlim(50,150)+ylim(-100,50)+
  theme_bw(20)
In Detail

Rubber Band: More flexible constraints

If we use quartiles or the average instead of the maximum value we can make the method less sensitive to outlier pixels

ggplot(subset(cortbone.df,val<1),aes(x=x,y=518-y))+
  geom_raster(aes(fill="original"),alpha=0.8)+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),90,eval.fun=mean),
               aes(fill="rubber band mean value"),alpha=0.5)+
  geom_polygon(data=rubber.band.data(subset(cortbone.df,val<1),90,
                                     eval.fun=function(x) quantile(x,0.99)),
               aes(fill="rubber band upper 99%"),alpha=0.5)+

  labs(fill="Image",y="y",x="x")+
  coord_equal()+
  theme_bw(20)
In Detail

Contouring: Manual - Guided Methods

Many forms of guided methods exist, the most popular is known simply as the Magnetic Lasso in Adobe Photoshop (video).

The basic principal behind many of these methods is to optimize a set of user given points based on local edge-like information in the image. In the brain cortex example, this is the small gradients in the gray values which our eyes naturally seperate out as an edge but which have many gaps and discontinuities.

Active Contours / Snakes

Beyond

Fuzzy Classification

Fuzzy classification based on Fuzzy logic and Fuzzy set theory and is a general catagory for multi-value logic instead of simply true and false and can be used to build IF and THEN statements from our probabilistic models.

Instead of

\[P(\{\vec{x} , I(\vec{x})\} | \alpha) \propto P(\alpha) + P(I(\vec{x}) | \alpha)+\] \[P(\sum_{x^{\prime} \in \mathcal{N}} I(\vec{x^{\prime}}) | \alpha)\]


Clear simple rules

which encompass aspects of filtering, thresholding, and morphological operations

Cell Colony